static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
{
- physdev_op_t op;
+ struct physdev_apic apic_op;
int ret;
- op.cmd = PHYSDEVOP_APIC_READ;
- op.u.apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
- op.u.apic_op.reg = reg;
- ret = HYPERVISOR_physdev_op(&op);
+ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
+ apic_op.reg = reg;
+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
if (ret)
return ret;
- return op.u.apic_op.value;
+ return apic_op.value;
}
static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
{
- physdev_op_t op;
+ struct physdev_apic apic_op;
- op.cmd = PHYSDEVOP_APIC_WRITE;
- op.u.apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
- op.u.apic_op.reg = reg;
- op.u.apic_op.value = value;
- HYPERVISOR_physdev_op(&op);
+ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
+ apic_op.reg = reg;
+ apic_op.value = value;
+ HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
}
#define io_apic_read(a,r) xen_io_apic_read(a,r)
int assign_irq_vector(int irq)
{
- physdev_op_t op;
+ struct physdev_irq irq_op;
BUG_ON(irq >= NR_IRQ_VECTORS);
if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
return IO_APIC_VECTOR(irq);
- op.cmd = PHYSDEVOP_ASSIGN_VECTOR;
- op.u.irq_op.irq = irq;
- if (HYPERVISOR_physdev_op(&op))
+ irq_op.irq = irq;
+ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
return -ENOSPC;
- vector_irq[op.u.irq_op.vector] = irq;
+ vector_irq[irq_op.vector] = irq;
if (irq != AUTO_ASSIGN)
- IO_APIC_VECTOR(irq) = op.u.irq_op.vector;
+ IO_APIC_VECTOR(irq) = irq_op.vector;
- return op.u.irq_op.vector;
+ return irq_op.vector;
}
#ifndef CONFIG_XEN
{
struct thread_struct * t = ¤t->thread;
unsigned long *bitmap;
- physdev_op_t op;
+ struct physdev_set_iobitmap set_iobitmap;
if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
return -EINVAL;
memset(bitmap, 0xff, IO_BITMAP_BYTES);
t->io_bitmap_ptr = bitmap;
- op.cmd = PHYSDEVOP_SET_IOBITMAP;
- op.u.set_iobitmap.bitmap = (char *)bitmap;
- op.u.set_iobitmap.nr_ports = IO_BITMAP_BITS;
- HYPERVISOR_physdev_op(&op);
+ set_iobitmap.bitmap = (char *)bitmap;
+ set_iobitmap.nr_ports = IO_BITMAP_BITS;
+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
}
set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
/* The process may have allocated an io port bitmap... nuke it. */
if (unlikely(NULL != t->io_bitmap_ptr)) {
- physdev_op_t op = { 0 };
- op.cmd = PHYSDEVOP_SET_IOBITMAP;
- HYPERVISOR_physdev_op(&op);
+ struct physdev_set_iobitmap set_iobitmap = { 0 };
+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
kfree(t->io_bitmap_ptr);
t->io_bitmap_ptr = NULL;
}
#ifndef CONFIG_X86_NO_TSS
struct tss_struct *tss = &per_cpu(init_tss, cpu);
#endif
- physdev_op_t iopl_op, iobmp_op;
+ struct physdev_set_iopl iopl_op;
+ struct physdev_set_iobitmap iobmp_op;
multicall_entry_t _mcl[8], *mcl = _mcl;
/* XEN NOTE: FS/GS saved in switch_mm(), not here. */
#undef C
if (unlikely(prev->iopl != next->iopl)) {
- iopl_op.cmd = PHYSDEVOP_SET_IOPL;
- iopl_op.u.set_iopl.iopl = (next->iopl == 0) ? 1 :
- (next->iopl >> 12) & 3;
+ iopl_op.iopl = (next->iopl == 0) ? 1 : (next->iopl >> 12) & 3;
mcl->op = __HYPERVISOR_physdev_op;
- mcl->args[0] = (unsigned long)&iopl_op;
+ mcl->args[0] = PHYSDEVOP_set_iopl;
+ mcl->args[1] = (unsigned long)&iopl_op;
mcl++;
}
if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
- iobmp_op.cmd =
- PHYSDEVOP_SET_IOBITMAP;
- iobmp_op.u.set_iobitmap.bitmap =
- (char *)next->io_bitmap_ptr;
- iobmp_op.u.set_iobitmap.nr_ports =
- next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
+ iobmp_op.bitmap = (char *)next->io_bitmap_ptr;
+ iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
mcl->op = __HYPERVISOR_physdev_op;
- mcl->args[0] = (unsigned long)&iobmp_op;
+ mcl->args[0] = PHYSDEVOP_set_iobitmap;
+ mcl->args[1] = (unsigned long)&iobmp_op;
mcl++;
}
void __init setup_arch(char **cmdline_p)
{
int i, j, k, fpp;
- physdev_op_t op;
+ struct physdev_set_iopl set_iopl;
unsigned long max_low_pfn;
/* Force a quick death if the kernel panics (not domain 0). */
if (efi_enabled)
efi_map_memmap();
- op.cmd = PHYSDEVOP_SET_IOPL;
- op.u.set_iopl.iopl = 1;
- HYPERVISOR_physdev_op(&op);
+ set_iopl.iopl = 1;
+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
#ifdef CONFIG_X86_IO_APIC
check_acpi_pci(); /* Checks more than just ACPI actually */
config XEN_INTERFACE_VERSION
hex
depends on XEN
- default 0x00030201
+ default 0x00030202
config SCHED_NO_NO_OMIT_FRAME_POINTER
bool
#include <asm/hypervisor.h>
static inline unsigned int xen_iosapic_read(char __iomem *iosapic, unsigned int reg)
{
- physdev_op_t op;
+ struct physdev_apic apic_op;
int ret;
- op.cmd = PHYSDEVOP_APIC_READ;
- op.u.apic_op.apic_physbase = (unsigned long)iosapic -
+ apic_op.apic_physbase = (unsigned long)iosapic -
__IA64_UNCACHED_OFFSET;
- op.u.apic_op.reg = reg;
- ret = HYPERVISOR_physdev_op(&op);
+ apic_op.reg = reg;
+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
if (ret)
return ret;
- return op.u.apic_op.value;
+ return apic_op.value;
}
static inline void xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
{
- physdev_op_t op;
+ struct physdev_apic apic_op;
- op.cmd = PHYSDEVOP_APIC_WRITE;
- op.u.apic_op.apic_physbase = (unsigned long)iosapic -
+ apic_op.apic_physbase = (unsigned long)iosapic -
__IA64_UNCACHED_OFFSET;
- op.u.apic_op.reg = reg;
- op.u.apic_op.value = val;
- HYPERVISOR_physdev_op(&op);
+ apic_op.reg = reg;
+ apic_op.value = val;
+ HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
}
static inline unsigned int iosapic_read(char __iomem *iosapic, unsigned int reg)
int xen_assign_irq_vector(int irq)
{
- physdev_op_t op;
+ struct physdev_irq irq_op;
- op.cmd = PHYSDEVOP_ASSIGN_VECTOR;
- op.u.irq_op.irq = irq;
- if (HYPERVISOR_physdev_op(&op))
+ irq_op.irq = irq;
+ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
return -ENOSPC;
- return op.u.irq_op.vector;
+ return irq_op.vector;
}
#endif /* XEN */
#if 0 // FIXME: diverged from x86 evtchn.c
/* Slow path (hypercall) if this is a non-local port. */
if (unlikely(cpu != cpu_from_evtchn(port))) {
- evtchn_op_t op = { .cmd = EVTCHNOP_unmask,
- .u.unmask.port = port };
- (void)HYPERVISOR_event_channel_op(&op);
+ struct evtchn_unmask op = { .port = port };
+ (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &op);
return;
}
#endif
const char *devname,
void *dev_id)
{
- evtchn_op_t op;
+ struct evtchn_bind_virq bind_virq;
int evtchn;
spin_lock(&irq_mapping_update_lock);
- op.cmd = EVTCHNOP_bind_virq;
- op.u.bind_virq.virq = virq;
- op.u.bind_virq.vcpu = cpu;
- BUG_ON(HYPERVISOR_event_channel_op(&op) != 0 );
- evtchn = op.u.bind_virq.port;
+ bind_virq.virq = virq;
+ bind_virq.vcpu = cpu;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq) != 0)
+ BUG();
+ evtchn = bind_virq.port;
if (!unbound_irq(evtchn)) {
evtchn = -EINVAL;
void unbind_from_irqhandler(unsigned int irq, void *dev_id)
{
- evtchn_op_t op;
+ struct evtchn_close close;
int evtchn = evtchn_from_irq(irq);
spin_lock(&irq_mapping_update_lock);
if (unbound_irq(irq))
goto out;
- op.cmd = EVTCHNOP_close;
- op.u.close.port = evtchn;
- BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
+ close.port = evtchn;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
+ BUG();
switch (type_from_irq(irq)) {
case IRQT_VIRQ:
static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
{
- physdev_op_t op;
+ struct physdev_apic apic_op;
int ret;
- op.cmd = PHYSDEVOP_APIC_READ;
- op.u.apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
- op.u.apic_op.reg = reg;
- ret = HYPERVISOR_physdev_op(&op);
+ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
+ apic_op.reg = reg;
+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
if (ret)
return ret;
- return op.u.apic_op.value;
+ return apic_op.value;
}
static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
{
- physdev_op_t op;
+ struct physdev_apic apic_op;
- op.cmd = PHYSDEVOP_APIC_WRITE;
- op.u.apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
- op.u.apic_op.reg = reg;
- op.u.apic_op.value = value;
- HYPERVISOR_physdev_op(&op);
+ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
+ apic_op.reg = reg;
+ apic_op.value = value;
+ HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
}
#define io_apic_read(a,r) xen_io_apic_read(a,r)
int assign_irq_vector(int irq)
{
- physdev_op_t op;
+ struct physdev_irq irq_op;
BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
return IO_APIC_VECTOR(irq);
- op.cmd = PHYSDEVOP_ASSIGN_VECTOR;
- op.u.irq_op.irq = irq;
- if (HYPERVISOR_physdev_op(&op))
+ irq_op.irq = irq;
+ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
return -ENOSPC;
- vector_irq[op.u.irq_op.vector] = irq;
+ vector_irq[irq_op.vector] = irq;
if (irq != AUTO_ASSIGN)
- IO_APIC_VECTOR(irq) = op.u.irq_op.vector;
+ IO_APIC_VECTOR(irq) = irq_op.vector;
- return op.u.irq_op.vector;
+ return irq_op.vector;
}
extern void (*interrupt[NR_IRQS])(void);
asmlinkage long sys_iopl(unsigned int new_iopl, struct pt_regs *regs)
{
- unsigned int old_iopl = current->thread.iopl;
- physdev_op_t op;
+ unsigned int old_iopl = current->thread.iopl;
+ struct physdev_set_iopl set_iopl;
if (new_iopl > 3)
return -EINVAL;
current->thread.iopl = new_iopl;
/* Force the change at ring 0. */
- op.cmd = PHYSDEVOP_SET_IOPL;
- op.u.set_iopl.iopl = (new_iopl == 0) ? 1 : new_iopl;
- HYPERVISOR_physdev_op(&op);
+ set_iopl.iopl = (new_iopl == 0) ? 1 : new_iopl;
+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
return 0;
}
struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
#endif
#ifdef CONFIG_XEN
- static physdev_op_t iobmp_op = {
- .cmd = PHYSDEVOP_SET_IOBITMAP
- };
+ struct physdev_set_iobitmap iobmp_op = { 0 };
#endif
kfree(t->io_bitmap_ptr);
put_cpu();
#endif
#ifdef CONFIG_XEN
- HYPERVISOR_physdev_op(&iobmp_op);
+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobmp_op);
#endif
t->io_bitmap_max = 0;
}
#ifndef CONFIG_X86_NO_TSS
struct tss_struct *tss = &per_cpu(init_tss, cpu);
#endif
- physdev_op_t iopl_op, iobmp_op;
+ struct physdev_set_iopl iopl_op;
+ struct physdev_set_iobitmap iobmp_op;
multicall_entry_t _mcl[8], *mcl = _mcl;
/*
#undef C
if (unlikely(prev->iopl != next->iopl)) {
- iopl_op.cmd = PHYSDEVOP_SET_IOPL;
- iopl_op.u.set_iopl.iopl = (next->iopl == 0) ? 1 : next->iopl;
+ iopl_op.iopl = (next->iopl == 0) ? 1 : next->iopl;
mcl->op = __HYPERVISOR_physdev_op;
- mcl->args[0] = (unsigned long)&iopl_op;
+ mcl->args[0] = PHYSDEVOP_set_iopl;
+ mcl->args[1] = (unsigned long)&iopl_op;
mcl++;
}
if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
- iobmp_op.cmd =
- PHYSDEVOP_SET_IOBITMAP;
- iobmp_op.u.set_iobitmap.bitmap =
- (char *)next->io_bitmap_ptr;
- iobmp_op.u.set_iobitmap.nr_ports =
- next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
+ iobmp_op.bitmap = (char *)next->io_bitmap_ptr;
+ iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
mcl->op = __HYPERVISOR_physdev_op;
- mcl->args[0] = (unsigned long)&iobmp_op;
+ mcl->args[0] = PHYSDEVOP_set_iobitmap;
+ mcl->args[1] = (unsigned long)&iobmp_op;
mcl++;
}
#ifdef CONFIG_XEN
{
- physdev_op_t op;
+ struct physdev_set_iopl set_iopl;
- op.cmd = PHYSDEVOP_SET_IOPL;
- op.u.set_iopl.iopl = 1;
- HYPERVISOR_physdev_op(&op);
+ set_iopl.iopl = 1;
+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
if (xen_start_info->flags & SIF_INITDOMAIN) {
if (!(xen_start_info->flags & SIF_PRIVILEGED))
if XEN
config XEN_INTERFACE_VERSION
hex
- default 0x00030201
+ default 0x00030202
menu "XEN"
{
blkif_sring_t *sring;
int err;
- evtchn_op_t op = {
- .cmd = EVTCHNOP_bind_interdomain,
- .u.bind_interdomain.remote_dom = blkif->domid,
- .u.bind_interdomain.remote_port = evtchn };
+ struct evtchn_bind_interdomain bind_interdomain;
/* Already connected through? */
if (blkif->irq)
return err;
}
- err = HYPERVISOR_event_channel_op(&op);
+ bind_interdomain.remote_dom = blkif->domid;
+ bind_interdomain.remote_port = evtchn;
+
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
+ &bind_interdomain);
if (err) {
unmap_frontend_page(blkif);
free_vm_area(blkif->blk_ring_area);
return err;
}
- blkif->evtchn = op.u.bind_interdomain.local_port;
+ blkif->evtchn = bind_interdomain.local_port;
sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
{
blkif_sring_t *sring;
int err;
- evtchn_op_t op = {
- .cmd = EVTCHNOP_bind_interdomain,
- .u.bind_interdomain.remote_dom = blkif->domid,
- .u.bind_interdomain.remote_port = evtchn };
+ struct evtchn_bind_interdomain bind_interdomain;
if ((blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL)
return -ENOMEM;
return err;
}
- err = HYPERVISOR_event_channel_op(&op);
+ bind_interdomain.remote_dom = blkif->domid;
+ bind_interdomain.remote_port = evtchn;
+
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
+ &bind_interdomain);
if (err) {
unmap_frontend_page(blkif);
free_vm_area(blkif->blk_ring_area);
return err;
}
- blkif->evtchn = op.u.bind_interdomain.local_port;
+ blkif->evtchn = bind_interdomain.local_port;
sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
static int irq_bindcount[NR_IRQS];
/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
-static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
+static unsigned long pirq_needs_eoi[NR_PIRQS/sizeof(unsigned long)];
#ifdef CONFIG_SMP
static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
{
- evtchn_op_t op = { .cmd = EVTCHNOP_bind_virq };
+ struct evtchn_bind_virq bind_virq;
int evtchn, irq;
spin_lock(&irq_mapping_update_lock);
if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
- op.u.bind_virq.virq = virq;
- op.u.bind_virq.vcpu = cpu;
- BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
- evtchn = op.u.bind_virq.port;
+ bind_virq.virq = virq;
+ bind_virq.vcpu = cpu;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
+ &bind_virq) != 0)
+ BUG();
+ evtchn = bind_virq.port;
irq = find_unbound_irq();
evtchn_to_irq[evtchn] = irq;
static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
{
- evtchn_op_t op = { .cmd = EVTCHNOP_bind_ipi };
+ struct evtchn_bind_ipi bind_ipi;
int evtchn, irq;
spin_lock(&irq_mapping_update_lock);
if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
- op.u.bind_ipi.vcpu = cpu;
- BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
- evtchn = op.u.bind_ipi.port;
+ bind_ipi.vcpu = cpu;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
+ &bind_ipi) != 0)
+ BUG();
+ evtchn = bind_ipi.port;
irq = find_unbound_irq();
evtchn_to_irq[evtchn] = irq;
static void unbind_from_irq(unsigned int irq)
{
- evtchn_op_t op = { .cmd = EVTCHNOP_close };
+ struct evtchn_close close;
int evtchn = evtchn_from_irq(irq);
spin_lock(&irq_mapping_update_lock);
if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
- op.u.close.port = evtchn;
- BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
+ close.port = evtchn;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
+ BUG();
switch (type_from_irq(irq)) {
case IRQT_VIRQ:
/* Rebind an evtchn so that it gets delivered to a specific cpu */
static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{
- evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
+ struct evtchn_bind_vcpu bind_vcpu;
int evtchn;
spin_lock(&irq_mapping_update_lock);
}
/* Send future instances of this interrupt to other vcpu. */
- op.u.bind_vcpu.port = evtchn;
- op.u.bind_vcpu.vcpu = tcpu;
+ bind_vcpu.port = evtchn;
+ bind_vcpu.vcpu = tcpu;
/*
* If this fails, it usually just indicates that we're dealing with a
* virq or IPI channel, which don't actually need to be rebound. Ignore
* it, but don't do the xenlinux-level rebind in that case.
*/
- if (HYPERVISOR_event_channel_op(&op) >= 0)
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
bind_evtchn_to_cpu(evtchn, tcpu);
spin_unlock(&irq_mapping_update_lock);
static inline void pirq_unmask_notify(int pirq)
{
- physdev_op_t op;
- if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
- op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
- (void)HYPERVISOR_physdev_op(&op);
- }
+ struct physdev_eoi eoi = { .irq = pirq };
+ if (unlikely(test_bit(pirq, &pirq_needs_eoi[0])))
+ (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
}
static inline void pirq_query_unmask(int pirq)
{
- physdev_op_t op;
- op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
- op.u.irq_status_query.irq = pirq;
- (void)HYPERVISOR_physdev_op(&op);
- clear_bit(pirq, &pirq_needs_unmask_notify[0]);
- if (op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY)
- set_bit(pirq, &pirq_needs_unmask_notify[0]);
+ struct physdev_irq_status_query irq_status;
+ irq_status.irq = pirq;
+ (void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
+ clear_bit(pirq, &pirq_needs_eoi[0]);
+ if (irq_status.flags & XENIRQSTAT_needs_eoi)
+ set_bit(pirq, &pirq_needs_eoi[0]);
}
/*
static unsigned int startup_pirq(unsigned int irq)
{
- evtchn_op_t op = { .cmd = EVTCHNOP_bind_pirq };
+ struct evtchn_bind_pirq bind_pirq;
int evtchn = evtchn_from_irq(irq);
if (VALID_EVTCHN(evtchn))
goto out;
- op.u.bind_pirq.pirq = irq;
+ bind_pirq.pirq = irq;
/* NB. We are happy to share unless we are probing. */
- op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
- if (HYPERVISOR_event_channel_op(&op) != 0) {
+ bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
if (!probing_irq(irq))
printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
irq);
return 0;
}
- evtchn = op.u.bind_pirq.port;
+ evtchn = bind_pirq.port;
pirq_query_unmask(irq_to_pirq(irq));
static void shutdown_pirq(unsigned int irq)
{
- evtchn_op_t op = { .cmd = EVTCHNOP_close };
+ struct evtchn_close close;
int evtchn = evtchn_from_irq(irq);
if (!VALID_EVTCHN(evtchn))
mask_evtchn(evtchn);
- op.u.close.port = evtchn;
- BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
+ close.port = evtchn;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
+ BUG();
bind_evtchn_to_cpu(evtchn, 0);
evtchn_to_irq[evtchn] = -1;
/* Slow path (hypercall) if this is a non-local port. */
if (unlikely(cpu != cpu_from_evtchn(port))) {
- evtchn_op_t op = { .cmd = EVTCHNOP_unmask,
- .u.unmask.port = port };
- (void)HYPERVISOR_event_channel_op(&op);
+ struct evtchn_unmask unmask = { .port = port };
+ (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
return;
}
void irq_resume(void)
{
- evtchn_op_t op;
- int cpu, pirq, virq, ipi, irq, evtchn;
+ struct evtchn_bind_virq bind_virq;
+ struct evtchn_bind_ipi bind_ipi;
+ int cpu, pirq, virq, ipi, irq, evtchn;
init_evtchn_cpu_bindings();
BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
/* Get a new binding from Xen. */
- memset(&op, 0, sizeof(op));
- op.cmd = EVTCHNOP_bind_virq;
- op.u.bind_virq.virq = virq;
- op.u.bind_virq.vcpu = 0;
- BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
- evtchn = op.u.bind_virq.port;
+ bind_virq.virq = virq;
+ bind_virq.vcpu = 0;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
+ &bind_virq) != 0)
+ BUG();
+ evtchn = bind_virq.port;
/* Record the new mapping. */
evtchn_to_irq[evtchn] = irq;
BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
/* Get a new binding from Xen. */
- memset(&op, 0, sizeof(op));
- op.cmd = EVTCHNOP_bind_ipi;
- op.u.bind_ipi.vcpu = 0;
- BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
- evtchn = op.u.bind_ipi.port;
+ bind_ipi.vcpu = 0;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
+ &bind_ipi) != 0)
+ BUG();
+ evtchn = bind_ipi.port;
/* Record the new mapping. */
evtchn_to_irq[evtchn] = irq;
int rc;
struct per_user_data *u = file->private_data;
void __user *uarg = (void __user *) arg;
- evtchn_op_t op = { 0 };
switch (cmd) {
case IOCTL_EVTCHN_BIND_VIRQ: {
struct ioctl_evtchn_bind_virq bind;
+ struct evtchn_bind_virq bind_virq;
rc = -EFAULT;
if (copy_from_user(&bind, uarg, sizeof(bind)))
break;
- op.cmd = EVTCHNOP_bind_virq;
- op.u.bind_virq.virq = bind.virq;
- op.u.bind_virq.vcpu = 0;
- rc = HYPERVISOR_event_channel_op(&op);
+ bind_virq.virq = bind.virq;
+ bind_virq.vcpu = 0;
+ rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
+ &bind_virq);
if (rc != 0)
break;
- rc = op.u.bind_virq.port;
+ rc = bind_virq.port;
evtchn_bind_to_user(u, rc);
break;
}
case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
struct ioctl_evtchn_bind_interdomain bind;
+ struct evtchn_bind_interdomain bind_interdomain;
rc = -EFAULT;
if (copy_from_user(&bind, uarg, sizeof(bind)))
break;
- op.cmd = EVTCHNOP_bind_interdomain;
- op.u.bind_interdomain.remote_dom = bind.remote_domain;
- op.u.bind_interdomain.remote_port = bind.remote_port;
- rc = HYPERVISOR_event_channel_op(&op);
+ bind_interdomain.remote_dom = bind.remote_domain;
+ bind_interdomain.remote_port = bind.remote_port;
+ rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
+ &bind_interdomain);
if (rc != 0)
break;
- rc = op.u.bind_interdomain.local_port;
+ rc = bind_interdomain.local_port;
evtchn_bind_to_user(u, rc);
break;
}
case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
struct ioctl_evtchn_bind_unbound_port bind;
+ struct evtchn_alloc_unbound alloc_unbound;
rc = -EFAULT;
if (copy_from_user(&bind, uarg, sizeof(bind)))
break;
- op.cmd = EVTCHNOP_alloc_unbound;
- op.u.alloc_unbound.dom = DOMID_SELF;
- op.u.alloc_unbound.remote_dom = bind.remote_domain;
- rc = HYPERVISOR_event_channel_op(&op);
+ alloc_unbound.dom = DOMID_SELF;
+ alloc_unbound.remote_dom = bind.remote_domain;
+ rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
+ &alloc_unbound);
if (rc != 0)
break;
- rc = op.u.alloc_unbound.port;
+ rc = alloc_unbound.port;
evtchn_bind_to_user(u, rc);
break;
}
case IOCTL_EVTCHN_UNBIND: {
struct ioctl_evtchn_unbind unbind;
+ struct evtchn_close close;
int ret;
rc = -EFAULT;
spin_unlock_irq(&port_user_lock);
- op.cmd = EVTCHNOP_close;
- op.u.close.port = unbind.port;
- ret = HYPERVISOR_event_channel_op(&op);
+ close.port = unbind.port;
+ ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
BUG_ON(ret);
rc = 0;
{
int i;
struct per_user_data *u = filp->private_data;
- evtchn_op_t op = { 0 };
+ struct evtchn_close close;
spin_lock_irq(&port_user_lock);
port_user[i] = NULL;
mask_evtchn(i);
- op.cmd = EVTCHNOP_close;
- op.u.close.port = i;
- ret = HYPERVISOR_event_channel_op(&op);
+ close.port = i;
+ ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
BUG_ON(ret);
}
int err = -ENOMEM;
netif_tx_sring_t *txs;
netif_rx_sring_t *rxs;
- evtchn_op_t op = {
- .cmd = EVTCHNOP_bind_interdomain,
- .u.bind_interdomain.remote_dom = netif->domid,
- .u.bind_interdomain.remote_port = evtchn };
+ struct evtchn_bind_interdomain bind_interdomain;
/* Already connected through? */
if (netif->irq)
if (err)
goto err_map;
- err = HYPERVISOR_event_channel_op(&op);
+ bind_interdomain.remote_dom = netif->domid;
+ bind_interdomain.remote_port = evtchn;
+
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
+ &bind_interdomain);
if (err)
goto err_hypervisor;
- netif->evtchn = op.u.bind_interdomain.local_port;
+ netif->evtchn = bind_interdomain.local_port;
netif->irq = bind_evtchn_to_irqhandler(
netif->evtchn, netif_be_int, 0, netif->dev->name, netif);
static struct proc_dir_entry *privcmd_intf;
static struct proc_dir_entry *capabilities_intf;
-#define NR_HYPERCALLS 32
+#define NR_HYPERCALLS 64
static DECLARE_BITMAP(hypercall_permission_map, NR_HYPERCALLS);
static int privcmd_ioctl(struct inode *inode, struct file *file,
int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn)
{
int err;
- evtchn_op_t op = {
- .cmd = EVTCHNOP_bind_interdomain,
- .u.bind_interdomain.remote_dom = tpmif->domid,
- .u.bind_interdomain.remote_port = evtchn,
- };
+ struct evtchn_bind_interdomain bind_interdomain;
if (tpmif->irq) {
return 0;
return err;
}
- err = HYPERVISOR_event_channel_op(&op);
+
+ bind_interdomain.remote_dom = tpmif->domid;
+ bind_interdomain.remote_port = evtchn;
+
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
+ &bind_interdomain);
if (err) {
unmap_frontend_page(tpmif);
free_vm_area(tpmif->tx_area);
return err;
}
- tpmif->evtchn = op.u.bind_interdomain.local_port;
+ tpmif->evtchn = bind_interdomain.local_port;
tpmif->tx = (tpmif_tx_interface_t *)tpmif->tx_area->addr;
int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
{
- evtchn_op_t op = {
- .cmd = EVTCHNOP_alloc_unbound,
- .u.alloc_unbound.dom = DOMID_SELF,
- .u.alloc_unbound.remote_dom = dev->otherend_id
- };
- int err = HYPERVISOR_event_channel_op(&op);
+ struct evtchn_alloc_unbound alloc_unbound;
+ int err;
+
+ alloc_unbound.dom = DOMID_SELF;
+ alloc_unbound.remote_dom = dev->otherend_id;
+
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
+ &alloc_unbound);
if (err)
xenbus_dev_fatal(dev, err, "allocating event channel");
else
- *port = op.u.alloc_unbound.port;
+ *port = alloc_unbound.port;
+
return err;
}
EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
{
- evtchn_op_t op = {
- .cmd = EVTCHNOP_bind_interdomain,
- .u.bind_interdomain.remote_dom = dev->otherend_id,
- .u.bind_interdomain.remote_port = remote_port,
- };
- int err = HYPERVISOR_event_channel_op(&op);
+ struct evtchn_bind_interdomain bind_interdomain;
+ int err;
+
+ bind_interdomain.remote_dom = dev->otherend_id;
+ bind_interdomain.remote_port = remote_port,
+
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
+ &bind_interdomain);
if (err)
xenbus_dev_fatal(dev, err,
"binding to event channel %d from domain %d",
remote_port, dev->otherend_id);
else
- *port = op.u.bind_interdomain.local_port;
+ *port = bind_interdomain.local_port;
+
return err;
}
EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
int xenbus_free_evtchn(struct xenbus_device *dev, int port)
{
- evtchn_op_t op = {
- .cmd = EVTCHNOP_close,
- .u.close.port = port,
- };
- int err = HYPERVISOR_event_channel_op(&op);
+ struct evtchn_close close;
+ int err;
+
+ close.port = port;
+
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
if (err)
xenbus_dev_error(dev, err, "freeing event channel %d", port);
+
return err;
}
dom0 = (xen_start_info->store_evtchn == 0);
if (dom0) {
- evtchn_op_t op = { 0 };
+ struct evtchn_alloc_unbound alloc_unbound;
/* Allocate page. */
page = get_zeroed_page(GFP_KERNEL);
PAGE_SHIFT);
/* Next allocate a local port which xenstored can bind to */
- op.cmd = EVTCHNOP_alloc_unbound;
- op.u.alloc_unbound.dom = DOMID_SELF;
- op.u.alloc_unbound.remote_dom = 0;
+ alloc_unbound.dom = DOMID_SELF;
+ alloc_unbound.remote_dom = 0;
- err = HYPERVISOR_event_channel_op(&op);
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
+ &alloc_unbound);
if (err == -ENOSYS)
goto err;
BUG_ON(err);
- xen_start_info->store_evtchn = op.u.alloc_unbound.port;
+ xen_start_info->store_evtchn = alloc_unbound.port;
/* And finally publish the above info in /proc/xen */
xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600);
static inline int
HYPERVISOR_event_channel_op(
- void *op)
+ int cmd, void *arg)
{
- return _hypercall1(int, event_channel_op, op);
+ int rc = _hypercall2(int, event_channel_op, cmd, arg);
+ if (unlikely(rc == -ENOSYS)) {
+ struct evtchn_op op;
+ op.cmd = cmd;
+ memcpy(&op.u, arg, sizeof(op.u));
+ rc = _hypercall1(int, event_channel_op_compat, &op);
+ }
+ return rc;
}
static inline int
static inline int
HYPERVISOR_physdev_op(
- void *physdev_op)
+ int cmd, void *arg)
{
- return _hypercall1(int, physdev_op, physdev_op);
+ int rc = _hypercall2(int, physdev_op, cmd, arg);
+ if (unlikely(rc == -ENOSYS)) {
+ struct physdev_op op;
+ op.cmd = cmd;
+ memcpy(&op.u, arg, sizeof(op.u));
+ rc = _hypercall1(int, physdev_op_compat, &op);
+ }
+ return rc;
}
static inline int
#include <linux/errno.h>
#include <xen/interface/xen.h>
#include <xen/interface/dom0_ops.h>
+#include <xen/interface/event_channel.h>
+#include <xen/interface/physdev.h>
#include <xen/interface/sched.h>
#include <xen/interface/nmi.h>
#include <asm/ptrace.h>
*/
static inline void set_iopl_mask(unsigned mask)
{
- physdev_op_t op;
+ struct physdev_set_iopl set_iopl;
/* Force the change at ring 0. */
- op.cmd = PHYSDEVOP_SET_IOPL;
- op.u.set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
- HYPERVISOR_physdev_op(&op);
+ set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
}
/* Forward declaration, a strange C thing */
static inline int
HYPERVISOR_event_channel_op(
- void *op)
+ int cmd, void *arg)
{
- return _hypercall1(int, event_channel_op, op);
+ int rc = _hypercall2(int, event_channel_op, cmd, arg);
+ if (unlikely(rc == -ENOSYS)) {
+ struct evtchn_op op;
+ op.cmd = cmd;
+ memcpy(&op.u, arg, sizeof(op.u));
+ rc = _hypercall1(int, event_channel_op_compat, &op);
+ }
+ return rc;
}
static inline int
static inline int
HYPERVISOR_physdev_op(
- void *physdev_op)
+ int cmd, void *arg)
{
- return _hypercall1(int, physdev_op, physdev_op);
+ int rc = _hypercall2(int, physdev_op, cmd, arg);
+ if (unlikely(rc == -ENOSYS)) {
+ struct physdev_op op;
+ op.cmd = cmd;
+ memcpy(&op.u, arg, sizeof(op.u));
+ rc = _hypercall1(int, physdev_op_compat, &op);
+ }
+ return rc;
}
static inline int
#include <linux/errno.h>
#include <xen/interface/xen.h>
#include <xen/interface/dom0_ops.h>
+#include <xen/interface/event_channel.h>
+#include <xen/interface/physdev.h>
#include <xen/interface/sched.h>
#include <asm/hypercall.h>
#include <asm/ptrace.h>
static inline int
HYPERVISOR_event_channel_op(
- void *op)
+ int cmd, void *arg)
{
- return _hypercall1(int, event_channel_op, op);
+ int rc = _hypercall2(int, event_channel_op, cmd, arg);
+ if (unlikely(rc == -ENOSYS)) {
+ struct evtchn_op op;
+ op.cmd = cmd;
+ memcpy(&op.u, arg, sizeof(op.u));
+ rc = _hypercall1(int, event_channel_op_compat, &op);
+ }
+ return rc;
}
static inline int
static inline int
HYPERVISOR_physdev_op(
- void *physdev_op)
+ int cmd, void *arg)
{
- return _hypercall1(int, physdev_op, physdev_op);
+ int rc = _hypercall2(int, physdev_op, cmd, arg);
+ if (unlikely(rc == -ENOSYS)) {
+ struct physdev_op op;
+ op.cmd = cmd;
+ memcpy(&op.u, arg, sizeof(op.u));
+ rc = _hypercall1(int, physdev_op_compat, &op);
+ }
+ return rc;
}
static inline int
static inline void notify_remote_via_evtchn(int port)
{
- evtchn_op_t op;
- op.cmd = EVTCHNOP_send,
- op.u.send.port = port;
- (void)HYPERVISOR_event_channel_op(&op);
+ struct evtchn_send send = { .port = port };
+ (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
}
/*
#include "xc_private.h"
-static int do_evtchn_op(int xc_handle, evtchn_op_t *op)
+static int do_evtchn_op(int xc_handle, int cmd, void *arg, size_t arg_size)
{
int ret = -1;
DECLARE_HYPERCALL;
hypercall.op = __HYPERVISOR_event_channel_op;
- hypercall.arg[0] = (unsigned long)op;
+ hypercall.arg[0] = cmd;
+ hypercall.arg[1] = (unsigned long)arg;
- if ( mlock(op, sizeof(*op)) != 0 )
+ if ( mlock(arg, arg_size) != 0 )
{
- PERROR("do_evtchn_op: op mlock failed");
+ PERROR("do_evtchn_op: arg mlock failed");
goto out;
}
if ((ret = do_xen_hypercall(xc_handle, &hypercall)) < 0)
ERROR("do_evtchn_op: HYPERVISOR_event_channel_op failed: %d", ret);
- safe_munlock(op, sizeof(*op));
+ safe_munlock(arg, arg_size);
out:
return ret;
}
uint32_t remote_dom)
{
int rc;
- evtchn_op_t op = {
- .cmd = EVTCHNOP_alloc_unbound,
- .u.alloc_unbound.dom = (domid_t)dom,
- .u.alloc_unbound.remote_dom = (domid_t)remote_dom };
+ struct evtchn_alloc_unbound arg = {
+ .dom = (domid_t)dom,
+ .remote_dom = (domid_t)remote_dom
+ };
- if ( (rc = do_evtchn_op(xc_handle, &op)) == 0 )
- rc = op.u.alloc_unbound.port;
+ rc = do_evtchn_op(xc_handle, EVTCHNOP_alloc_unbound, &arg, sizeof(arg));
+ if ( rc == 0 )
+ rc = arg.port;
return rc;
}
evtchn_port_t port,
xc_evtchn_status_t *status)
{
- int rc;
- evtchn_op_t op = {
- .cmd = EVTCHNOP_status,
- .u.status.dom = (domid_t)dom,
- .u.status.port = port };
-
- if ( (rc = do_evtchn_op(xc_handle, &op)) == 0 )
- memcpy(status, &op.u.status, sizeof(*status));
-
- return rc;
+ status->dom = (domid_t)dom;
+ status->port = port;
+ return do_evtchn_op(xc_handle, EVTCHNOP_status, status, sizeof(*status));
}
vmx_vcpu_increment_iip(vcpu);
}
-void hyper_event_channel_op(void)
+void hyper_event_channel_op_compat(void)
{
VCPU *vcpu=current;
u64 r32,ret;
vcpu_get_gr_nat(vcpu,16,&r32);
- ret=do_event_channel_op(guest_handle_from_ptr(r32, evtchn_op_t));
+ ret=do_event_channel_op_compat(guest_handle_from_ptr(r32, evtchn_op_t));
vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
data8 hyper_not_support //hyper_multicall
data8 hyper_not_support //hyper_update_va_mapping
data8 hyper_not_support //hyper_set_timer_op /* 15 */
- data8 hyper_event_channel_op
+ data8 hyper_event_channel_op_compat
data8 hyper_xen_version
data8 hyper_not_support //hyper_console_io
data8 hyper_not_support //hyper_physdev_op
#include <xen/domain.h>
extern unsigned long translate_domain_mpaddr(unsigned long);
+static long do_physdev_op_compat(int cmd, XEN_GUEST_HANDLE(void) arg);
static long do_physdev_op(XEN_GUEST_HANDLE(physdev_op_t) uop);
/* FIXME: where these declarations should be there ? */
extern int dump_privop_counts_to_user(char *, int);
(hypercall_t)do_multicall,
(hypercall_t)do_ni_hypercall, /* do_update_va_mapping */
(hypercall_t)do_ni_hypercall, /* do_set_timer_op */ /* 15 */
- (hypercall_t)do_event_channel_op,
+ (hypercall_t)do_event_channel_op_compat,
(hypercall_t)do_xen_version,
(hypercall_t)do_console_io,
- (hypercall_t)do_physdev_op,
+ (hypercall_t)do_physdev_op_compat,
(hypercall_t)do_grant_table_op, /* 20 */
(hypercall_t)do_ni_hypercall, /* do_vm_assist */
(hypercall_t)do_ni_hypercall, /* do_update_va_mapping_otherdomain */
(hypercall_t)do_ni_hypercall, /* do_nmi_op */
(hypercall_t)do_sched_op,
(hypercall_t)do_ni_hypercall, /* */ /* 30 */
- (hypercall_t)do_ni_hypercall /* */
+ (hypercall_t)do_ni_hypercall, /* */
+ (hypercall_t)do_event_channel_op,
+ (hypercall_t)do_physdev_op
};
uint32_t nr_hypercalls =
iosapic_guest_write(
unsigned long physbase, unsigned int reg, u32 pval);
-static long do_physdev_op(XEN_GUEST_HANDLE(physdev_op_t) uop)
+static long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
{
- struct physdev_op op;
+ int irq;
long ret;
- int irq;
-
- if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
- return -EFAULT;
- switch ( op.cmd )
+ switch ( cmd )
{
- case PHYSDEVOP_IRQ_UNMASK_NOTIFY:
+ case PHYSDEVOP_eoi: {
+ struct physdev_eoi eoi;
+ ret = -EFAULT;
+ if ( copy_from_guest(&eoi, arg, 1) != 0 )
+ break;
+ ret = pirq_guest_eoi(current->domain, eoi.irq);
+ break;
+ }
+
+ /* Legacy since 0x00030202. */
+ case PHYSDEVOP_IRQ_UNMASK_NOTIFY: {
ret = pirq_guest_unmask(current->domain);
break;
+ }
- case PHYSDEVOP_IRQ_STATUS_QUERY:
- irq = op.u.irq_status_query.irq;
+ case PHYSDEVOP_irq_status_query: {
+ struct physdev_irq_status_query irq_status_query;
+ ret = -EFAULT;
+ if ( copy_from_guest(&irq_status_query, arg, 1) != 0 )
+ break;
+ irq = irq_status_query.irq;
ret = -EINVAL;
if ( (irq < 0) || (irq >= NR_IRQS) )
break;
- op.u.irq_status_query.flags = 0;
+ irq_status_query.flags = 0;
/* Edge-triggered interrupts don't need an explicit unmask downcall. */
if ( !strstr(irq_desc[irq_to_vector(irq)].handler->typename, "edge") )
- op.u.irq_status_query.flags |= PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY;
- ret = 0;
+ irq_status_query.flags |= XENIRQSTAT_needs_eoi;
+ ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
break;
+ }
- case PHYSDEVOP_APIC_READ:
+ case PHYSDEVOP_apic_read: {
+ struct physdev_apic apic;
+ ret = -EFAULT;
+ if ( copy_from_guest(&apic, arg, 1) != 0 )
+ break;
ret = -EPERM;
if ( !IS_PRIV(current->domain) )
break;
- ret = iosapic_guest_read(
- op.u.apic_op.apic_physbase,
- op.u.apic_op.reg,
- &op.u.apic_op.value);
+ ret = iosapic_guest_read(apic.apic_physbase, apic.reg, &apic.value);
+ if ( copy_to_guest(arg, &apic, 1) != 0 )
+ ret = -EFAULT;
break;
+ }
- case PHYSDEVOP_APIC_WRITE:
+ case PHYSDEVOP_apic_write: {
+ struct physdev_apic apic;
+ ret = -EFAULT;
+ if ( copy_from_guest(&apic, arg, 1) != 0 )
+ break;
ret = -EPERM;
if ( !IS_PRIV(current->domain) )
break;
- ret = iosapic_guest_write(
- op.u.apic_op.apic_physbase,
- op.u.apic_op.reg,
- op.u.apic_op.value);
+ ret = iosapic_guest_write(apic.apic_physbase, apic.reg, apic.value);
break;
+ }
- case PHYSDEVOP_ASSIGN_VECTOR:
+ case PHYSDEVOP_alloc_irq_vector: {
+ struct physdev_irq irq_op;
+
+ ret = -EFAULT;
+ if ( copy_from_guest(&irq_op, arg, 1) != 0 )
+ break;
+
+ ret = -EPERM;
if ( !IS_PRIV(current->domain) )
- return -EPERM;
+ break;
- if ( (irq = op.u.irq_op.irq) >= NR_IRQS )
- return -EINVAL;
+ ret = -EINVAL;
+ if ( (irq = irq_op.irq) >= NR_IRQS )
+ break;
- op.u.irq_op.vector = assign_irq_vector(irq);
- ret = 0;
+ irq_op.vector = assign_irq_vector(irq);
+ ret = copy_to_guest(arg, &irq_op, 1) ? -EFAULT : 0;
break;
+ }
default:
ret = -EINVAL;
break;
}
- if ( copy_to_guest(uop, &op, 1) )
- ret = -EFAULT;
-
return ret;
}
+
+/* Legacy hypercall (as of 0x00030202). */
+static long do_physdev_op_compat(XEN_GUEST_HANDLE(physdev_op_t) uop)
+{
+ struct physdev_op op;
+
+ if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
+ return -EFAULT;
+
+ return do_physdev_op(op.cmd, guest_handle_from_ptr(&uop.p->u, void));
+}
+
+/* Legacy hypercall (as of 0x00030202). */
+long do_event_channel_op_compat(XEN_GUEST_HANDLE(evtchn_op_t) uop)
+{
+ struct evtchn_op op;
+
+ if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
+ return -EFAULT;
+
+ return do_event_channel_op(op.cmd, guest_handle_from_ptr(&uop.p->u, void));
+}
}
}
+int pirq_guest_eoi(struct domain *d, int irq)
+{
+ irq_desc_t *desc;
+
+ if ( (irq < 0) || (irq >= NR_IRQS) )
+ return -EINVAL;
+
+ desc = &irq_desc[irq];
+ spin_lock_irq(&desc->lock);
+ if ( test_and_clear_bit(irq, &d->pirq_mask) &&
+ (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
+ desc->handler->end(irq);
+ spin_unlock_irq(&desc->lock);
+
+ return 0;
+
+}
+
int pirq_guest_unmask(struct domain *d)
{
irq_desc_t *desc;
- int pirq;
+ int irq;
shared_info_t *s = d->shared_info;
- for ( pirq = find_first_bit(d->pirq_mask, NR_PIRQS);
- pirq < NR_PIRQS;
- pirq = find_next_bit(d->pirq_mask, NR_PIRQS, pirq+1) )
+ for ( irq = find_first_bit(d->pirq_mask, NR_PIRQS);
+ irq < NR_PIRQS;
+ irq = find_next_bit(d->pirq_mask, NR_PIRQS, irq+1) )
{
- desc = &irq_desc[pirq];
+ desc = &irq_desc[irq];
spin_lock_irq(&desc->lock);
- if ( !test_bit(d->pirq_to_evtchn[pirq], &s->evtchn_mask[0]) &&
- test_and_clear_bit(pirq, &d->pirq_mask) &&
+ if ( !test_bit(d->pirq_to_evtchn[irq], &s->evtchn_mask[0]) &&
+ test_and_clear_bit(irq, &d->pirq_mask) &&
(--((irq_guest_action_t *)desc->action)->in_flight == 0) )
- desc->handler->end(pirq);
+ desc->handler->end(irq);
spin_unlock_irq(&desc->lock);
}
obj-y += apic.o
obj-y += audit.o
obj-y += bitops.o
+obj-y += compat.o
obj-y += delay.o
obj-y += dmi_scan.o
obj-y += dom0_ops.o
--- /dev/null
+/******************************************************************************
+ * compat.c
+ *
+ * Implementations of legacy hypercalls. These call through to the new
+ * hypercall after doing necessary argument munging.
+ */
+
+#include <xen/config.h>
+#include <xen/guest_access.h>
+#include <xen/hypercall.h>
+
+/* Legacy hypercall (as of 0x00030202). */
+long do_physdev_op_compat(XEN_GUEST_HANDLE(physdev_op_t) uop)
+{
+ struct physdev_op op;
+
+ if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
+ return -EFAULT;
+
+ return do_physdev_op(op.cmd, (XEN_GUEST_HANDLE(void)) { &uop.p->u });
+}
+
+/* Legacy hypercall (as of 0x00030202). */
+long do_event_channel_op_compat(XEN_GUEST_HANDLE(evtchn_op_t) uop)
+{
+ struct evtchn_op op;
+
+ if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
+ return -EFAULT;
+
+ return do_event_channel_op(op.cmd, (XEN_GUEST_HANDLE(void)) {&uop.p->u });
+}
flush_ready_eoi(NULL);
}
-int pirq_guest_unmask(struct domain *d)
+static void __pirq_guest_eoi(struct domain *d, int irq)
{
irq_desc_t *desc;
irq_guest_action_t *action;
- cpumask_t cpu_eoi_map = CPU_MASK_NONE;
- unsigned int pirq, cpu = smp_processor_id();
- shared_info_t *s = d->shared_info;
+ cpumask_t cpu_eoi_map;
+
+ desc = &irq_desc[irq_to_vector(irq)];
+ action = (irq_guest_action_t *)desc->action;
+
+ spin_lock_irq(&desc->lock);
+
+ ASSERT(!test_bit(irq, d->pirq_mask) ||
+ (action->ack_type != ACKTYPE_NONE));
- for ( pirq = find_first_bit(d->pirq_mask, NR_PIRQS);
- pirq < NR_PIRQS;
- pirq = find_next_bit(d->pirq_mask, NR_PIRQS, pirq+1) )
+ if ( unlikely(!test_and_clear_bit(irq, d->pirq_mask)) ||
+ unlikely(--action->in_flight != 0) )
{
- desc = &irq_desc[irq_to_vector(pirq)];
- action = (irq_guest_action_t *)desc->action;
+ spin_unlock_irq(&desc->lock);
+ return;
+ }
- spin_lock_irq(&desc->lock);
+ if ( action->ack_type == ACKTYPE_UNMASK )
+ {
+ ASSERT(cpus_empty(action->cpu_eoi_map));
+ desc->handler->end(irq_to_vector(irq));
+ return;
+ }
- if ( !test_bit(d->pirq_to_evtchn[pirq], s->evtchn_mask) &&
- test_and_clear_bit(pirq, d->pirq_mask) )
- {
- ASSERT(action->ack_type != ACKTYPE_NONE);
- if ( --action->in_flight == 0 )
- {
- if ( action->ack_type == ACKTYPE_UNMASK )
- desc->handler->end(irq_to_vector(pirq));
- cpu_eoi_map = action->cpu_eoi_map;
- }
- }
+ ASSERT(action->ack_type == ACKTYPE_EOI);
+
+ cpu_eoi_map = action->cpu_eoi_map;
- if ( cpu_test_and_clear(cpu, cpu_eoi_map) )
- {
- __set_eoi_ready(desc);
- spin_unlock(&desc->lock);
- flush_ready_eoi(NULL);
- local_irq_enable();
- }
- else
- {
- spin_unlock_irq(&desc->lock);
- }
+ if ( cpu_test_and_clear(smp_processor_id(), cpu_eoi_map) )
+ {
+ __set_eoi_ready(desc);
+ spin_unlock(&desc->lock);
+ flush_ready_eoi(NULL);
+ local_irq_enable();
+ }
+ else
+ {
+ spin_unlock_irq(&desc->lock);
+ }
- if ( !cpus_empty(cpu_eoi_map) )
- {
- on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 0);
- cpu_eoi_map = CPU_MASK_NONE;
- }
+ if ( !cpus_empty(cpu_eoi_map) )
+ on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 0);
+}
+
+int pirq_guest_eoi(struct domain *d, int irq)
+{
+ if ( (irq < 0) || (irq >= NR_IRQS) )
+ return -EINVAL;
+
+ __pirq_guest_eoi(d, irq);
+
+ return 0;
+}
+
+int pirq_guest_unmask(struct domain *d)
+{
+ unsigned int irq;
+ shared_info_t *s = d->shared_info;
+
+ for ( irq = find_first_bit(d->pirq_mask, NR_PIRQS);
+ irq < NR_PIRQS;
+ irq = find_next_bit(d->pirq_mask, NR_PIRQS, irq+1) )
+ {
+ if ( !test_bit(d->pirq_to_evtchn[irq], s->evtchn_mask) )
+ __pirq_guest_eoi(d, irq);
}
return 0;
pirq_acktype(
int irq);
-/*
- * Demuxing hypercall.
- */
-long do_physdev_op(XEN_GUEST_HANDLE(physdev_op_t) uop)
+long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
{
- struct physdev_op op;
+ int irq;
long ret;
- int irq;
-
- if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
- return -EFAULT;
- switch ( op.cmd )
+ switch ( cmd )
{
- case PHYSDEVOP_IRQ_UNMASK_NOTIFY:
+ case PHYSDEVOP_eoi: {
+ struct physdev_eoi eoi;
+ ret = -EFAULT;
+ if ( copy_from_guest(&eoi, arg, 1) != 0 )
+ break;
+ ret = pirq_guest_eoi(current->domain, eoi.irq);
+ break;
+ }
+
+ /* Legacy since 0x00030202. */
+ case PHYSDEVOP_IRQ_UNMASK_NOTIFY: {
ret = pirq_guest_unmask(current->domain);
break;
+ }
- case PHYSDEVOP_IRQ_STATUS_QUERY:
- irq = op.u.irq_status_query.irq;
+ case PHYSDEVOP_irq_status_query: {
+ struct physdev_irq_status_query irq_status_query;
+ ret = -EFAULT;
+ if ( copy_from_guest(&irq_status_query, arg, 1) != 0 )
+ break;
+ irq = irq_status_query.irq;
ret = -EINVAL;
if ( (irq < 0) || (irq >= NR_IRQS) )
break;
- op.u.irq_status_query.flags = 0;
+ irq_status_query.flags = 0;
if ( pirq_acktype(irq) != 0 )
- op.u.irq_status_query.flags |= PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY;
- ret = 0;
+ irq_status_query.flags |= XENIRQSTAT_needs_eoi;
+ ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
break;
+ }
- case PHYSDEVOP_APIC_READ:
+ case PHYSDEVOP_apic_read: {
+ struct physdev_apic apic;
+ ret = -EFAULT;
+ if ( copy_from_guest(&apic, arg, 1) != 0 )
+ break;
ret = -EPERM;
if ( !IS_PRIV(current->domain) )
break;
- ret = ioapic_guest_read(
- op.u.apic_op.apic_physbase,
- op.u.apic_op.reg,
- &op.u.apic_op.value);
+ ret = ioapic_guest_read(apic.apic_physbase, apic.reg, &apic.value);
+ if ( copy_to_guest(arg, &apic, 1) != 0 )
+ ret = -EFAULT;
break;
+ }
- case PHYSDEVOP_APIC_WRITE:
+ case PHYSDEVOP_apic_write: {
+ struct physdev_apic apic;
+ ret = -EFAULT;
+ if ( copy_from_guest(&apic, arg, 1) != 0 )
+ break;
ret = -EPERM;
if ( !IS_PRIV(current->domain) )
break;
- ret = ioapic_guest_write(
- op.u.apic_op.apic_physbase,
- op.u.apic_op.reg,
- op.u.apic_op.value);
+ ret = ioapic_guest_write(apic.apic_physbase, apic.reg, apic.value);
break;
+ }
- case PHYSDEVOP_ASSIGN_VECTOR:
+ case PHYSDEVOP_alloc_irq_vector: {
+ struct physdev_irq irq_op;
+
+ ret = -EFAULT;
+ if ( copy_from_guest(&irq_op, arg, 1) != 0 )
+ break;
+
+ ret = -EPERM;
if ( !IS_PRIV(current->domain) )
- return -EPERM;
+ break;
- if ( (irq = op.u.irq_op.irq) >= NR_IRQS )
- return -EINVAL;
+ ret = -EINVAL;
+ if ( (irq = irq_op.irq) >= NR_IRQS )
+ break;
- op.u.irq_op.vector = assign_irq_vector(irq);
- ret = 0;
+ irq_op.vector = assign_irq_vector(irq);
+ ret = copy_to_guest(arg, &irq_op, 1) ? -EFAULT : 0;
break;
+ }
- case PHYSDEVOP_SET_IOPL:
+ case PHYSDEVOP_set_iopl: {
+ struct physdev_set_iopl set_iopl;
+ ret = -EFAULT;
+ if ( copy_from_guest(&set_iopl, arg, 1) != 0 )
+ break;
ret = -EINVAL;
- if ( op.u.set_iopl.iopl > 3 )
+ if ( set_iopl.iopl > 3 )
break;
ret = 0;
- current->arch.iopl = op.u.set_iopl.iopl;
+ current->arch.iopl = set_iopl.iopl;
break;
+ }
- case PHYSDEVOP_SET_IOBITMAP:
+ case PHYSDEVOP_set_iobitmap: {
+ struct physdev_set_iobitmap set_iobitmap;
+ ret = -EFAULT;
+ if ( copy_from_guest(&set_iobitmap, arg, 1) != 0 )
+ break;
ret = -EINVAL;
- if ( !access_ok(op.u.set_iobitmap.bitmap, IOBMP_BYTES) ||
- (op.u.set_iobitmap.nr_ports > 65536) )
+ if ( !access_ok(set_iobitmap.bitmap, IOBMP_BYTES) ||
+ (set_iobitmap.nr_ports > 65536) )
break;
ret = 0;
- current->arch.iobmp = op.u.set_iobitmap.bitmap;
- current->arch.iobmp_limit = op.u.set_iobitmap.nr_ports;
+ current->arch.iobmp = set_iobitmap.bitmap;
+ current->arch.iobmp_limit = set_iobitmap.nr_ports;
break;
+ }
+
default:
ret = -EINVAL;
break;
}
- if ( copy_to_guest(uop, &op, 1) )
- ret = -EFAULT;
-
return ret;
}
.long do_multicall
.long do_update_va_mapping
.long do_set_timer_op /* 15 */
- .long do_event_channel_op
+ .long do_event_channel_op_compat
.long do_xen_version
.long do_console_io
- .long do_physdev_op
+ .long do_physdev_op_compat
.long do_grant_table_op /* 20 */
.long do_vm_assist
.long do_update_va_mapping_otherdomain
.long do_arch_sched_op
.long do_callback_op /* 30 */
.long do_xenoprof_op
+ .long do_event_channel_op
+ .long do_physdev_op
.rept NR_hypercalls-((.-hypercall_table)/4)
.long do_ni_hypercall
.endr
.byte 2 /* do_multicall */
.byte 4 /* do_update_va_mapping */
.byte 2 /* do_set_timer_op */ /* 15 */
- .byte 1 /* do_event_channel_op */
+ .byte 1 /* do_event_channel_op_compat */
.byte 2 /* do_xen_version */
.byte 3 /* do_console_io */
- .byte 1 /* do_physdev_op */
+ .byte 1 /* do_physdev_op_compat */
.byte 3 /* do_grant_table_op */ /* 20 */
.byte 2 /* do_vm_assist */
.byte 5 /* do_update_va_mapping_otherdomain */
.byte 2 /* do_arch_sched_op */
.byte 2 /* do_callback_op */ /* 30 */
.byte 2 /* do_xenoprof_op */
+ .byte 2 /* do_event_channel_op */
+ .byte 2 /* do_physdev_op */
.rept NR_hypercalls-(.-hypercall_args_table)
.byte 0 /* do_ni_hypercall */
.endr
.quad do_multicall
.quad do_update_va_mapping
.quad do_set_timer_op /* 15 */
- .quad do_event_channel_op
+ .quad do_event_channel_op_compat
.quad do_xen_version
.quad do_console_io
- .quad do_physdev_op
+ .quad do_physdev_op_compat
.quad do_grant_table_op /* 20 */
.quad do_vm_assist
.quad do_update_va_mapping_otherdomain
.quad do_arch_sched_op
.quad do_callback_op /* 30 */
.quad do_xenoprof_op
+ .quad do_event_channel_op
+ .quad do_physdev_op
.rept NR_hypercalls-((.-hypercall_table)/8)
.quad do_ni_hypercall
.endr
.byte 2 /* do_multicall */
.byte 3 /* do_update_va_mapping */
.byte 1 /* do_set_timer_op */ /* 15 */
- .byte 1 /* do_event_channel_op */
+ .byte 1 /* do_event_channel_op_compat */
.byte 2 /* do_xen_version */
.byte 3 /* do_console_io */
- .byte 1 /* do_physdev_op */
+ .byte 1 /* do_physdev_op_compat */
.byte 3 /* do_grant_table_op */ /* 20 */
.byte 2 /* do_vm_assist */
.byte 4 /* do_update_va_mapping_otherdomain */
.byte 2 /* do_arch_sched_op */
.byte 2 /* do_callback_op */ /* 30 */
.byte 2 /* do_xenoprof_op */
+ .byte 2 /* do_event_channel_op */
+ .byte 2 /* do_physdev_op */
.rept NR_hypercalls-(.-hypercall_args_table)
.byte 0 /* do_ni_hypercall */
.endr
struct domain *d;
int port;
domid_t dom = alloc->dom;
- long rc = 0;
+ long rc;
+
+ if ( (rc = acm_pre_eventchannel_unbound(dom, alloc->remote_dom)) != 0 )
+ return rc;
if ( dom == DOMID_SELF )
dom = current->domain->domain_id;
struct domain *ld = current->domain, *rd;
int lport, rport = bind->remote_port;
domid_t rdom = bind->remote_dom;
- long rc = 0;
+ long rc;
+
+ if ( (rc = acm_pre_eventchannel_interdomain(rdom)) != 0 )
+ return rc;
if ( rdom == DOMID_SELF )
rdom = current->domain->domain_id;
int port, virq = bind->virq, vcpu = bind->vcpu;
long rc = 0;
- if ( virq >= ARRAY_SIZE(v->virq_to_evtchn) )
+ if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
return -EINVAL;
if ( virq_is_global(virq) && (vcpu != 0) )
return -EINVAL;
- if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || ((v = d->vcpu[vcpu]) == NULL) )
+ if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
+ ((v = d->vcpu[vcpu]) == NULL) )
return -ENOENT;
spin_lock(&d->evtchn_lock);
int port, vcpu = bind->vcpu;
long rc = 0;
- if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu] == NULL) )
+ if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
+ (d->vcpu[vcpu] == NULL) )
return -ENOENT;
spin_lock(&d->evtchn_lock);
int port, pirq = bind->pirq;
long rc;
- if ( pirq >= ARRAY_SIZE(d->pirq_to_evtchn) )
+ if ( (pirq < 0) || (pirq >= ARRAY_SIZE(d->pirq_to_evtchn)) )
return -EINVAL;
if ( !irq_access_permitted(d, pirq) )
}
-long do_event_channel_op(XEN_GUEST_HANDLE(evtchn_op_t) uop)
+long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
{
long rc;
- struct evtchn_op op;
-
- if ( copy_from_guest(&op, uop, 1) != 0 )
- return -EFAULT;
- if (acm_pre_event_channel(&op))
- return -EACCES;
-
- switch ( op.cmd )
+ switch ( cmd )
{
- case EVTCHNOP_alloc_unbound:
- rc = evtchn_alloc_unbound(&op.u.alloc_unbound);
- if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
+ case EVTCHNOP_alloc_unbound: {
+ struct evtchn_alloc_unbound alloc_unbound;
+ if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
+ return -EFAULT;
+ rc = evtchn_alloc_unbound(&alloc_unbound);
+ if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
+ }
- case EVTCHNOP_bind_interdomain:
- rc = evtchn_bind_interdomain(&op.u.bind_interdomain);
- if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
+ case EVTCHNOP_bind_interdomain: {
+ struct evtchn_bind_interdomain bind_interdomain;
+ if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
+ return -EFAULT;
+ rc = evtchn_bind_interdomain(&bind_interdomain);
+ if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
+ }
- case EVTCHNOP_bind_virq:
- rc = evtchn_bind_virq(&op.u.bind_virq);
- if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
+ case EVTCHNOP_bind_virq: {
+ struct evtchn_bind_virq bind_virq;
+ if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
+ return -EFAULT;
+ rc = evtchn_bind_virq(&bind_virq);
+ if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
+ }
- case EVTCHNOP_bind_ipi:
- rc = evtchn_bind_ipi(&op.u.bind_ipi);
- if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
+ case EVTCHNOP_bind_ipi: {
+ struct evtchn_bind_ipi bind_ipi;
+ if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
+ return -EFAULT;
+ rc = evtchn_bind_ipi(&bind_ipi);
+ if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
+ }
- case EVTCHNOP_bind_pirq:
- rc = evtchn_bind_pirq(&op.u.bind_pirq);
- if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
+ case EVTCHNOP_bind_pirq: {
+ struct evtchn_bind_pirq bind_pirq;
+ if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
+ return -EFAULT;
+ rc = evtchn_bind_pirq(&bind_pirq);
+ if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
+ }
- case EVTCHNOP_close:
- rc = evtchn_close(&op.u.close);
+ case EVTCHNOP_close: {
+ struct evtchn_close close;
+ if ( copy_from_guest(&close, arg, 1) != 0 )
+ return -EFAULT;
+ rc = evtchn_close(&close);
break;
+ }
- case EVTCHNOP_send:
- rc = evtchn_send(op.u.send.port);
+ case EVTCHNOP_send: {
+ struct evtchn_send send;
+ if ( copy_from_guest(&send, arg, 1) != 0 )
+ return -EFAULT;
+ rc = evtchn_send(send.port);
break;
+ }
- case EVTCHNOP_status:
- rc = evtchn_status(&op.u.status);
- if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
+ case EVTCHNOP_status: {
+ struct evtchn_status status;
+ if ( copy_from_guest(&status, arg, 1) != 0 )
+ return -EFAULT;
+ rc = evtchn_status(&status);
+ if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
rc = -EFAULT;
break;
+ }
- case EVTCHNOP_bind_vcpu:
- rc = evtchn_bind_vcpu(op.u.bind_vcpu.port, op.u.bind_vcpu.vcpu);
+ case EVTCHNOP_bind_vcpu: {
+ struct evtchn_bind_vcpu bind_vcpu;
+ if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
+ return -EFAULT;
+ rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
break;
+ }
- case EVTCHNOP_unmask:
- rc = evtchn_unmask(&op.u.unmask);
+ case EVTCHNOP_unmask: {
+ struct evtchn_unmask unmask;
+ if ( copy_from_guest(&unmask, arg, 1) != 0 )
+ return -EFAULT;
+ rc = evtchn_unmask(&unmask);
break;
+ }
default:
rc = -ENOSYS;
{ return; }
static inline void acm_fail_dom0_op(struct dom0_op *op, void *ssid)
{ return; }
-static inline int acm_pre_event_channel(struct evtchn_op *op)
+static inline int acm_pre_eventchannel_unbound(domid_t id1, domid_t id2)
+{ return 0; }
+static inline int acm_pre_eventchannel_interdomain(domid_t id)
{ return 0; }
static inline int acm_pre_grant_map_ref(domid_t id)
{ return 0; }
}
}
-static inline int acm_pre_event_channel(struct evtchn_op *op)
-{
- int ret = -EACCES;
-
- switch(op->cmd) {
- case EVTCHNOP_alloc_unbound:
- ret = acm_pre_eventchannel_unbound(
- op->u.alloc_unbound.dom,
- op->u.alloc_unbound.remote_dom);
- break;
- case EVTCHNOP_bind_interdomain:
- ret = acm_pre_eventchannel_interdomain(
- op->u.bind_interdomain.remote_dom);
- break;
- default:
- ret = 0; /* ok */
- }
- return ret;
-}
-
static inline int acm_pre_grant_map_ref(domid_t id)
{
if ( (acm_primary_ops->pre_grant_map_ref != NULL) &&
#include <asm/types.h>
#include <asm/vcpu.h>
+extern long
+do_event_channel_op_compat(
+ XEN_GUEST_HANDLE(evtchn_op_t) uop);
+
extern int
vmx_do_mmu_update(
mmu_update_t *ureqs,
#include <public/physdev.h>
+extern long
+do_event_channel_op_compat(
+ XEN_GUEST_HANDLE(evtchn_op_t) uop);
+
+extern long
+do_physdev_op_compat(
+ XEN_GUEST_HANDLE(physdev_op_t) uop);
+
extern long
do_set_trap_table(
XEN_GUEST_HANDLE(trap_info_t) traps);
extern long
do_physdev_op(
- XEN_GUEST_HANDLE(physdev_op_t) uop);
+ int cmd, XEN_GUEST_HANDLE(void) arg);
extern int
do_update_va_mapping_otherdomain(
#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
#define __XEN_PUBLIC_EVENT_CHANNEL_H__
+/*
+ * Prototype for this hypercall is:
+ * int event_channel_op(int cmd, void *args)
+ * @cmd == EVTCHNOP_??? (event-channel operation).
+ * @args == Operation-specific extra arguments (NULL if none).
+ */
+
typedef uint32_t evtchn_port_t;
DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
evtchn_port_t port;
} evtchn_unmask_t;
+/*
+ * Argument to event_channel_op_compat() hypercall. Superceded by new
+ * event_channel_op() hypercall since 0x00030202.
+ */
typedef struct evtchn_op {
uint32_t cmd; /* EVTCHNOP_* */
union {
#ifndef __XEN_PUBLIC_PHYSDEV_H__
#define __XEN_PUBLIC_PHYSDEV_H__
-/* Commands to HYPERVISOR_physdev_op() */
-#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
-#define PHYSDEVOP_IRQ_STATUS_QUERY 5
-#define PHYSDEVOP_SET_IOPL 6
-#define PHYSDEVOP_SET_IOBITMAP 7
-#define PHYSDEVOP_APIC_READ 8
-#define PHYSDEVOP_APIC_WRITE 9
-#define PHYSDEVOP_ASSIGN_VECTOR 10
+/*
+ * Prototype for this hypercall is:
+ * int physdev_op(int cmd, void *args)
+ * @cmd == PHYSDEVOP_??? (physdev operation).
+ * @args == Operation-specific extra arguments (NULL if none).
+ */
+
+/*
+ * Notify end-of-interrupt (EOI) for the specified IRQ.
+ * @arg == pointer to physdev_eoi structure.
+ */
+#define PHYSDEVOP_eoi 12
+typedef struct physdev_eoi {
+ /* IN */
+ uint32_t irq;
+} physdev_eoi_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
-typedef struct physdevop_irq_status_query {
+/*
+ * Query the status of an IRQ line.
+ * @arg == pointer to physdev_irq_status_query structure.
+ */
+#define PHYSDEVOP_irq_status_query 5
+typedef struct physdev_irq_status_query {
/* IN */
uint32_t irq;
/* OUT */
-/* Need to call PHYSDEVOP_IRQ_UNMASK_NOTIFY when the IRQ has been serviced? */
-#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY (1<<0)
- uint32_t flags;
-} physdevop_irq_status_query_t;
+ uint32_t flags; /* XENIRQSTAT_* */
+} physdev_irq_status_query_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t);
-typedef struct physdevop_set_iopl {
+/* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */
+#define _XENIRQSTAT_needs_eoi (0)
+#define XENIRQSTAT_needs_eoi (1<<_XENIRQSTAT_needs_eoi)
+
+/*
+ * Set the current VCPU's I/O privilege level.
+ * @arg == pointer to physdev_set_iopl structure.
+ */
+#define PHYSDEVOP_set_iopl 6
+typedef struct physdev_set_iopl {
/* IN */
uint32_t iopl;
-} physdevop_set_iopl_t;
+} physdev_set_iopl_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t);
-typedef struct physdevop_set_iobitmap {
+/*
+ * Set the current VCPU's I/O-port permissions bitmap.
+ * @arg == pointer to physdev_set_iobitmap structure.
+ */
+#define PHYSDEVOP_set_iobitmap 7
+typedef struct physdev_set_iobitmap {
/* IN */
uint8_t *bitmap;
uint32_t nr_ports;
-} physdevop_set_iobitmap_t;
+} physdev_set_iobitmap_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t);
-typedef struct physdevop_apic {
+/*
+ * Read or write an IO-APIC register.
+ * @arg == pointer to physdev_apic structure.
+ */
+#define PHYSDEVOP_apic_read 8
+#define PHYSDEVOP_apic_write 9
+typedef struct physdev_apic {
/* IN */
unsigned long apic_physbase;
uint32_t reg;
/* IN or OUT */
uint32_t value;
-} physdevop_apic_t;
+} physdev_apic_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_apic_t);
-typedef struct physdevop_irq {
+/*
+ * Allocate or free a physical upcall vector for the specified IRQ line.
+ * @arg == pointer to physdev_irq structure.
+ */
+#define PHYSDEVOP_alloc_irq_vector 10
+#define PHYSDEVOP_free_irq_vector 11
+typedef struct physdev_irq {
/* IN */
uint32_t irq;
- /* OUT */
+ /* IN or OUT */
uint32_t vector;
-} physdevop_irq_t;
+} physdev_irq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_irq_t);
+/*
+ * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()
+ * hypercall since 0x00030202.
+ */
typedef struct physdev_op {
uint32_t cmd;
union {
- physdevop_irq_status_query_t irq_status_query;
- physdevop_set_iopl_t set_iopl;
- physdevop_set_iobitmap_t set_iobitmap;
- physdevop_apic_t apic_op;
- physdevop_irq_t irq_op;
+ physdev_irq_status_query_t irq_status_query;
+ physdev_set_iopl_t set_iopl;
+ physdev_set_iobitmap_t set_iobitmap;
+ physdev_apic_t apic_op;
+ physdev_irq_t irq_op;
} u;
} physdev_op_t;
DEFINE_XEN_GUEST_HANDLE(physdev_op_t);
+/*
+ * Notify that some PIRQ-bound event channels have been unmasked.
+ * ** This command is obsolete since interface version 0x00030202 and is **
+ * ** unsupported by newer versions of Xen. **
+ */
+#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
+
+/*
+ * These all-capitals physdev operation names are superceded by the new names
+ * (defined above) since interface version 0x00030202.
+ */
+#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
+#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
+#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap
+#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read
+#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write
+#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector
+#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
+
#endif /* __XEN_PUBLIC_PHYSDEV_H__ */
/*
#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
#define __XEN_PUBLIC_XEN_COMPAT_H__
-#define __XEN_LATEST_INTERFACE_VERSION__ 0x00030201
+#define __XEN_LATEST_INTERFACE_VERSION__ 0x00030202
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/* Xen is built with matching headers and implements the latest interface. */
typedef type * __guest_handle_ ## name
#endif
+/* New event-channel and physdev hypercalls introduced in 0x00030202. */
+#if __XEN_INTERFACE_VERSION__ < 0x00030202
+#undef __HYPERVISOR_event_channel_op
+#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat
+#undef __HYPERVISOR_physdev_op
+#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat
+#endif
+
#endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */
#define __HYPERVISOR_stack_switch 3
#define __HYPERVISOR_set_callbacks 4
#define __HYPERVISOR_fpu_taskswitch 5
-#define __HYPERVISOR_sched_op_compat 6 /* compat as of 0x00030101 */
+#define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */
#define __HYPERVISOR_dom0_op 7
#define __HYPERVISOR_set_debugreg 8
#define __HYPERVISOR_get_debugreg 9
#define __HYPERVISOR_multicall 13
#define __HYPERVISOR_update_va_mapping 14
#define __HYPERVISOR_set_timer_op 15
-#define __HYPERVISOR_event_channel_op 16
+#define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */
#define __HYPERVISOR_xen_version 17
#define __HYPERVISOR_console_io 18
-#define __HYPERVISOR_physdev_op 19
+#define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */
#define __HYPERVISOR_grant_table_op 20
#define __HYPERVISOR_vm_assist 21
#define __HYPERVISOR_update_va_mapping_otherdomain 22
#define __HYPERVISOR_sched_op 29
#define __HYPERVISOR_callback_op 30
#define __HYPERVISOR_xenoprof_op 31
+#define __HYPERVISOR_event_channel_op 32
+#define __HYPERVISOR_physdev_op 33
/* Architecture-specific hypercall definitions. */
#define __HYPERVISOR_arch_0 48
#include <xen/types.h>
#include <xen/time.h>
#include <public/xen.h>
+#include <public/dom0_ops.h>
#include <public/acm_ops.h>
#include <public/event_channel.h>
#include <asm/hypercall.h>
extern long
do_event_channel_op(
- XEN_GUEST_HANDLE(evtchn_op_t) uop);
+ int cmd, XEN_GUEST_HANDLE(void) arg);
extern long
do_xen_version(
struct domain;
struct vcpu;
-extern int pirq_guest_unmask(struct domain *p);
-extern int pirq_guest_bind(struct vcpu *p, int irq, int will_share);
-extern int pirq_guest_unbind(struct domain *p, int irq);
+extern int pirq_guest_eoi(struct domain *d, int irq);
+extern int pirq_guest_unmask(struct domain *d);
+extern int pirq_guest_bind(struct vcpu *v, int irq, int will_share);
+extern int pirq_guest_unbind(struct domain *d, int irq);
#endif /* __XEN_IRQ_H__ */